This patch fixes the copy_from/to_guest problem.
As Akio reported, modularised netback causes dom0's down.
The following process is happened in gnttab_transfer()@
xen/common/grant_table.c:
gnttab_transfer()
=> steal_page()
=> assign_domain_page_cmpxchg_rel()
=> domain_page_flush()
=> domain_flush_vtlb_all() // all TLBs are flushed
...
=> __copy_to_guest_offset() // always fail to copy
The embedded netback module has no problem because it uses TR pinned
data. But modularised one is out of TR. So copy_from/to_guest issue
must be solved in order to modularise drivers.
Signed-off-by: Kouya SHIMURA <kouya@jp.fujitsu.com>
int
HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count)
{
+ __u64 va1, va2, pa1, pa2;
+
if (cmd == GNTTABOP_map_grant_ref) {
unsigned int i;
for (i = 0; i < count; i++) {
(struct gnttab_map_grant_ref*)uop + i);
}
}
-
- return ____HYPERVISOR_grant_table_op(cmd, uop, count);
+ va1 = (__u64)uop & PAGE_MASK;
+ pa1 = pa2 = 0;
+ if ((REGION_NUMBER(va1) == 5) &&
+ ((va1 - KERNEL_START) >= KERNEL_TR_PAGE_SIZE)) {
+ pa1 = ia64_tpa(va1);
+ if (cmd <= GNTTABOP_transfer) {
+ static uint32_t uop_size[GNTTABOP_transfer + 1] = {
+ sizeof(struct gnttab_map_grant_ref),
+ sizeof(struct gnttab_unmap_grant_ref),
+ sizeof(struct gnttab_setup_table),
+ sizeof(struct gnttab_dump_table),
+ sizeof(struct gnttab_transfer),
+ };
+ va2 = (__u64)uop + (uop_size[cmd] * count) - 1;
+ va2 &= PAGE_MASK;
+ if (va1 != va2) {
+ /* maximum size of uop is 2pages */
+ BUG_ON(va2 > va1 + PAGE_SIZE);
+ pa2 = ia64_tpa(va2);
+ }
+ }
+ }
+ return ____HYPERVISOR_grant_table_op(cmd, uop, count, pa1, pa2);
}
EXPORT_SYMBOL(HYPERVISOR_grant_table_op);
//XXX __HYPERVISOR_grant_table_op is used for this hypercall constant.
static inline int
____HYPERVISOR_grant_table_op(
- unsigned int cmd, void *uop, unsigned int count)
+ unsigned int cmd, void *uop, unsigned int count,
+ unsigned long pa1, unsigned long pa2)
{
- return _hypercall3(int, grant_table_op, cmd, uop, count);
+ return _hypercall5(int, grant_table_op, cmd, uop, count, pa1, pa2);
}
int HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count);
xen_hypercall (struct pt_regs *regs)
{
uint32_t cmd = (uint32_t)regs->r2;
+ struct vcpu *v = current;
+
+ if (cmd == __HYPERVISOR_grant_table_op) {
+ XEN_GUEST_HANDLE(void) uop;
+
+ v->arch.hypercall_param.va = regs->r15;
+ v->arch.hypercall_param.pa1 = regs->r17;
+ v->arch.hypercall_param.pa2 = regs->r18;
+ set_xen_guest_handle(uop, (void *)regs->r15);
+ regs->r8 = do_grant_table_op(regs->r14, uop, regs->r16);
+ v->arch.hypercall_param.va = 0;
+ return IA64_NO_FAULT;
+ }
if (cmd < NR_hypercalls) {
perfc_incra(hypercalls, cmd);
return IA64_NO_FAULT;
}
+
+int ia64_map_hypercall_param(void)
+{
+ struct vcpu *v = current;
+ struct domain *d = current->domain;
+ u64 vaddr = v->arch.hypercall_param.va & PAGE_MASK;
+ volatile pte_t* pte;
+
+ if (v->arch.hypercall_param.va == 0)
+ return FALSE;
+ pte = lookup_noalloc_domain_pte(d, v->arch.hypercall_param.pa1);
+ if (!pte || !pte_present(*pte))
+ return FALSE;
+ vcpu_itc_no_srlz(v, 2, vaddr, pte_val(*pte), -1UL, PAGE_SHIFT);
+ if (v->arch.hypercall_param.pa2) {
+ vaddr += PAGE_SIZE;
+ pte = lookup_noalloc_domain_pte(d, v->arch.hypercall_param.pa2);
+ if (pte && pte_present(*pte)) {
+ vcpu_itc_no_srlz(v, 2, vaddr, pte_val(*pte),
+ -1UL, PAGE_SHIFT);
+ }
+ }
+ ia64_srlz_d();
+ return TRUE;
+}
(sizeof(vcpu_info_t) * (v)->vcpu_id + \
offsetof(vcpu_info_t, evtchn_upcall_mask))
+struct hypercall_param {
+ unsigned long va;
+ unsigned long pa1;
+ unsigned long pa2;
+};
+
struct arch_vcpu {
/* Save the state of vcpu.
This is the first entry to speed up accesses. */
char irq_new_pending;
char irq_new_condition; // vpsr.i/vtpr change, check for pending VHPI
char hypercall_continuation;
+
+ struct hypercall_param hypercall_param; // used to remap a hypercall param
+
//for phycial emulation
unsigned long old_rsc;
int mode_flags;
extern unsigned long __must_check __copy_user (void __user *to, const void __user *from,
unsigned long count);
+extern int ia64_map_hypercall_param(void);
+
static inline unsigned long
__copy_to_user (void __user *to, const void *from, unsigned long count)
{
- return __copy_user(to, (void __user *) from, count);
+ unsigned long len;
+ len = __copy_user(to, (void __user *)from, count);
+ if (len == 0)
+ return 0;
+ if (ia64_map_hypercall_param())
+ len = __copy_user(to, (void __user *)from, count); /* retry */
+ return len;
}
static inline unsigned long
__copy_from_user (void *to, const void __user *from, unsigned long count)
{
- return __copy_user((void __user *) to, from, count);
+ unsigned long len;
+ len = __copy_user((void __user *)to, from, count);
+ if (len == 0)
+ return 0;
+ if (ia64_map_hypercall_param())
+ len = __copy_user((void __user *) to, from, count); /* retry */
+ return len;
}
#define __copy_to_user_inatomic __copy_to_user